gpt4 book ai didi

edu.illinois.cs.cogcomp.sl.util.WeightVector类的使用及代码示例

转载 作者:知者 更新时间:2024-03-25 04:57:05 28 4
gpt4 key购买 nike

本文整理了Java中edu.illinois.cs.cogcomp.sl.util.WeightVector类的一些代码示例,展示了WeightVector类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。WeightVector类的具体详情如下:
包路径:edu.illinois.cs.cogcomp.sl.util.WeightVector
类名称:WeightVector

WeightVector介绍

[英]The weight vector
[中]权重向量

代码示例

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

/***
 * Computes wv^T\phi(x,y).
 * Override this function if you have a faster implementation for computing
 * wv^T\phi(x,y).
 * @param wv
 * @param x
 * @param y
 * @return
 */
public float decisionValue(WeightVector wv, IInstance x, IStructure y) {
  return wv.dotProduct(getFeatureVector(x, y));
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-srl

public static WeightVector load(String fileName) {
  try {
    GZIPInputStream zipin = new GZIPInputStream(new FileInputStream(fileName));
    BufferedReader reader = new BufferedReader(new InputStreamReader(zipin));
    String line;
    line = reader.readLine().trim();
    if (!line.equals("WeightVector")) {
      reader.close();
      throw new IOException("Invalid model file.");
    }
    line = reader.readLine().trim();
    int size = Integer.parseInt(line);
    WeightVector w = new WeightVector(size);
    while ((line = reader.readLine()) != null) {
      line = line.trim();
      String[] parts = line.split(":");
      int index = Integer.parseInt(parts[0]);
      float value = Float.parseFloat(parts[1]);
      w.setElement(index, value);
    }
    zipin.close();
    return w;
  } catch (Exception e) {
    log.error("Error loading model file {}", fileName);
    System.exit(-1);
  }
  return null;
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

public void run(){
    // collect results
    wv.empty();
    for (int i = 0; i < n_thread; i++) {
      wv.addDenseVector(inf_runner_list[i].wv);
    }
    wv.scale(1.0 / (double) n_thread);
    for (int i = 0; i < n_thread; i++) {
      inf_runner_list[i].setWv(wv);
    }
  }
});

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

public static void printSparsity(WeightVector wv) {
  int nzeroes=0;
  System.out.println("SIZE: "+wv.getLength());
  for(float f:wv.getInternalArray())
  {
    if(f!=0.0)
      nzeroes++;
  }
  System.out.println("NZ values: "+nzeroes);
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

@Override
public void run(WeightVector w, AbstractInferenceSolver inference)
    throws Exception {
  float [] array = new float[w.getInternalArray().length];
  for(int i=0 ;i< array.length;i++)
    array[i] = w.getInternalArray()[i];
    
  WeightVector wv = new WeightVector(array);
  wvList.add(wv);
  runningTime.add(System.currentTimeMillis() - startTime);
}
public void postEvaluation(SLProblem sp, AbstractInferenceSolver infSolver) throws Exception{

代码示例来源:origin: edu.illinois.cs.cogcomp/IllinoisSL-core

public InferenceThread(
    AbstractInferenceSolver infSolver,
    StructuredInstanceWithAlphas[] subset, WeightVector wv, int threadId, Parameters parameters) {
  this.infSolver = infSolver;
  this.alphaInsList = subset;
  this.threadId = threadId;
  this.wv = new WeightVector(wv);
  this.parameters = parameters;
  logger.trace("Thread:" + threadId + " handles "
        + subset.length + " instances!");
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

/**
 * The function for the users to call for the structured SVM
 * 
 * @param sp
 *            Structured Labeled Dataset
 * @param params
 *            parameters
 * @return
 * @throws Exception
 */
@Override
public WeightVector train(final SLProblem sp, SLParameters params) throws Exception {
  WeightVector wv = null;
  
  // +1 because wv.u[0] stores the bias term
  if(params.TOTAL_NUMBER_FEATURE >0){
    wv = new WeightVector(params.TOTAL_NUMBER_FEATURE + 1);
    wv.setExtendable(false);
  } else {
    wv = new WeightVector(8192);
    wv.setExtendable(true);
  }
  return train(sp,params,wv);
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

float dot_product = wv.dotProduct(fv);
float xij_norm2 = fv.getSquareL2Norm();	
  float new_alpha = Math.max(alpha + step, 0);
  alphaSum += (new_alpha - alpha);
  wv.addSparseFeatureVector(fv, (new_alpha - alpha));
  as.alpha = new_alpha;

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

float priorScore = wv.get(numOfEmissionFeatures * numOflabels + j);
float zeroOrderScore = wv.dotProduct(seq.baseFeatures[0], j*numOfEmissionFeatures)+
    ((gold !=null && j != goldLabeledSeq.tags[0])?1:0);
dpTable[0][j] = priorScore + zeroOrderScore; 	 
  float zeroOrderScore =  wv.dotProduct(seq.baseFeatures[i], j*numOfEmissionFeatures)
      + ((gold!=null && j != goldLabeledSeq.tags[i])?1:0);
    float candidateScore = dpTable[(i-1)%2][k] +  wv.get(offset + (k * numOflabels + j));
    if (candidateScore > bestScore) {
      bestScore = candidateScore;

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

WeightVector avg = new WeightVector(10000);
WeightVector w = init;
      / 1000);
WeightVector a = new WeightVector(w);
a.addDenseVector(avg, -1.0f / (count));

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

IFeatureVector predictedFeatures = featureGenerator.getFeatureVector(example, prediction);
IFeatureVector update = goldFeatures.difference(predictedFeatures);
double loss_term = loss - w.dotProduct(update);
w.scale(1.0f-learningRate);
w.addSparseFeatureVector(update, 2*learningRate*params.C_FOR_STRUCTURE*loss_term);

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

struct_finder_list[i], featureGenerator, para);
  inf_runner_list[i] = new StructPerceptronHandler(spLearner,
      subProbs.get(i), new WeightVector(10000), para);
final WeightVector wv = new WeightVector(10000);				
barrier = new CyclicBarrier(n_thread, new Runnable(){
  public void run(){
wv.empty();		
for (int i = 0; i < n_thread; i++) {
  wv.addDenseVector(inf_runner_list[i].wv);
wv.scale(1.0 / (double) n_thread);

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

StructPerceptronHandler[] inf_runner_list = new StructPerceptronHandler[n_thread];
WeightVector wv = new WeightVector(10000);
long startTime = System.currentTimeMillis();
long trainTime = 0;
        struct_finder_list[i], featureGenerator, para);
    inf_runner_list[i] = new StructPerceptronHandler(spLearner,
        subProbs.get(i), new WeightVector(wv, 0), para);
  wv = new WeightVector(10000);
  for (int i = 0; i < n_thread; i++) {
    wv.addDenseVector(inf_runner_list[i].wv);
  wv.scale(1.0 / (double) n_thread);
  trainTime = System.currentTimeMillis() - startTime;

代码示例来源:origin: edu.illinois.cs.cogcomp/IllinoisSL-core

/**
 * Get primal objective function value with respect to the weight vector wv
 * @param sp
 * @param wv
 * @param infSolver
 * @param C
 * @return
 * @throws Exception
 */
public static float getPrimalObjective(
    StructuredProblem sp, WeightVector wv,
    AbstractInferenceSolver infSolver, float C) throws Exception {
  float obj = 0;
  obj += wv.getSquareL2Norm() * 0.5;
  List<IInstance> input_list = sp.instanceList;
  List<IStructure> output_list = sp.goldStructureList;
  for (int i = 0; i < input_list.size(); i++) {
    IInstance ins = input_list.get(i);
    IStructure gold_struct = output_list.get(i);
    float sC= C;
    Pair<IStructure, Float > res = infSolver
        .getLossAugmentedBestStructure(wv, ins, gold_struct);
    float loss = res.getSecond()
        + wv.dotProduct(res.getFirst().getFeatureVector())
        - wv.dotProduct(gold_struct.getFeatureVector());
    obj += sC * loss * loss;
  }
  return obj;
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

public void setWv(WeightVector wv){
    this.wv.setDenseVector(wv);
  }
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

@Deprecated
protected void fillWeightVector(WeightVector w) {
  for(AlphaStruct as: candidateAlphas){			
    w.addSparseFeatureVector(as.alphaFeactureVector, as.alpha);
  }
}
@Deprecated

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl

protected static float getDualObjective(
    StructuredInstanceWithAlphas[] alphaInsList, WeightVector wv) {
  float obj = 0;
  obj += wv.getSquareL2Norm() * 0.5;
  for (int i = 0; i < alphaInsList.length; i++) {
    StructuredInstanceWithAlphas instanceWithAlphas = alphaInsList[i];
    float w_sum = instanceWithAlphas.getLossWeightAlphaSum();
    float sum = instanceWithAlphas.alphaSum;
    float C = instanceWithAlphas.getC();
    obj -= w_sum;
    obj += (1.0 / (4.0 * C)) * sum * sum;
  }
  return obj;
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

/**
 * Duplicate a weight vector
 * @param wv
 */
public WeightVector(WeightVector wv) {
  float in[] = wv.getInternalArray();
  u =  new float[in.length];
  System.arraycopy(in, 0, u, 0, in.length);
  size = in.length;
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-verbsense

public static void save(String fileName, WeightVector wv) throws IOException {
  BufferedOutputStream stream =
      new BufferedOutputStream(new GZIPOutputStream(new FileOutputStream(fileName)));
  BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(stream));
  float[] w = wv.getWeightArray();
  writer.write("WeightVector");
  writer.newLine();
  writer.write(w.length + "");
  writer.newLine();
  int numNonZero = 0;
  for (int index = 0; index < w.length; index++) {
    if (w[index] != 0) {
      writer.write(index + ":" + w[index]);
      writer.newLine();
      numNonZero++;
    }
  }
  writer.close();
  log.info("Number of non zero weights: " + numNonZero);
}

代码示例来源:origin: edu.illinois.cs.cogcomp/illinois-sl-core

w.scale(1.0f-learningRate);
w.addSparseFeatureVector(update, 2*learningRate*params.C_FOR_STRUCTURE);

28 4 0
Copyright 2021 - 2024 cfsdn All Rights Reserved 蜀ICP备2022000587号
广告合作:1813099741@qq.com 6ren.com